# init repo notebook
!git clone https://github.com/rramosp/ppdl.git > /dev/null 2> /dev/null
!mv -n ppdl/content/init.py ppdl/content/local . 2> /dev/null
!pip install -r ppdl/content/requirements.txt > /dev/null
TF symbolic engine#
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import pandas as pd
%matplotlib inline
%load_ext tensorboard
from local.lib import mlutils
from IPython.display import Image
tf.__version__
'2.2.0'
TF is a symbolic computing + optimization library for machine learning problems#
ML expressions involve:
variables representing data as n-dimensional objects
variables representing parameters as n-dimensional objects
mostly matrix operations (multiplications, convolutions, etc.)
some non linear operations (activation functions)
Recall that in sympy we FIRST define expressions (a computational graph) and THEN we evaluate them feed concrete values.
Tensorflow INTEGRATES both aspects so that building computational graphs LOOKS LIKE writing regular Pytohn code as must as possible.
a
tf.Variablerepresents a symbolic variable, that contains a value
See:
https://www.tensorflow.org/guide/keras/train_and_evaluate
https://www.tensorflow.org/guide/keras/custom_layers_and_models
https://www.tensorflow.org/guide/keras/customizing_what_happens_in_fit
x = tf.Variable(initial_value=[7], name="x", dtype=tf.float32)
y = tf.Variable(initial_value=[9], name="y", dtype=tf.float32)
f = x**2+y**3
f
<tf.Tensor: shape=(1,), dtype=float32, numpy=array([778.], dtype=float32)>
f is SYMBOLIC EXPRESSION (a Tensor in TF terms) that also contains a value attached to it.
for which TF can obtain gradients automatically. This might seem a rather akward way of obtaining the gradient (with GradientTape). The goal is that you write code as in Python and TF takes care of building the computational graph with it.
with tf.GradientTape(persistent=True) as t:
f = x**2 + y**3
print (t.gradient(f, x), t.gradient(f, y))
tf.Tensor([14.], shape=(1,), dtype=float32) tf.Tensor([243.], shape=(1,), dtype=float32)
print (t.gradient(f, [x,y]))
[<tf.Tensor: shape=(1,), dtype=float32, numpy=array([14.], dtype=float32)>, <tf.Tensor: shape=(1,), dtype=float32, numpy=array([243.], dtype=float32)>]
usually expressions are built within functions decorated with @tf.function for performance
@tf.function
def myf(x,y):
return x**2 + y**3
with tf.GradientTape(persistent=True) as t:
f = myf(x,y)
print (t.gradient(f, x), t.gradient(f, y))
tf.Tensor([14.], shape=(1,), dtype=float32) tf.Tensor([243.], shape=(1,), dtype=float32)
!rm -rf logs
mlutils.make_graph(myf, x, y, logdir="logs")
<tensorflow.python.eager.def_function.Function object at 0x7f82b0317850>
%tensorboard --logdir logs
Tensors#
in Tensorflow the notion of a Tensor is just a symbolic multidimensional array. Although, this is a recent simplified version of what always has been known as a tensor in differential geometry (see https://bjlkeng.github.io/posts/tensors-tensors-tensors/).
Observe how Tensorflow naturally deals with multidimensional symbolic variables (Tensors)
n = 3
X = tf.Variable(initial_value=[[2, 6], [3, 1], [4, 5]], name="X", dtype=tf.float32)
w = tf.Variable(initial_value=[[-2],[1]], name="w", dtype=tf.float32)
y = tf.Variable(initial_value=[[8],[2],[3]], name="y", dtype=tf.float32)
with tf.GradientTape(persistent=True) as t:
f = tf.reduce_mean((tf.matmul(X,w)-y)**2)
g = t.gradient(f, w)
g
<tf.Tensor: shape=(2, 1), dtype=float32, numpy=
array([[-38. ],
[-48.666668]], dtype=float32)>
But a tf.Tensor is always a symbolic variable. In order to reconcile symbolic and execution worlds, Tensorflow attaches a value to each symbolic variable, and carries it forward when making derivations.
X,yandware Tensors that we define with a specific valuegis a Tensor derived fromX,yandwthat have ALSO been evaluated with the corresponding values.
g
<tf.Tensor: shape=(2, 1), dtype=float32, numpy=
array([[-38. ],
[-48.666668]], dtype=float32)>
g.numpy()
array([[-38. ],
[-48.666668]], dtype=float32)